# Set the maximum width for printing all columns in the console
# Ensure that the full tibble output is printed
options(tibble.print_max = Inf, tibble.print_min = Inf, tibble.width = Inf)
agr <- d %>%
group_by(Task) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanAccuracy, fill=Task)) +
geom_bar(position=dodge,stat="identity") +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
guides(fill = "none")
agr <- d %>%
group_by(Task,WhoseList) %>%
mutate(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=WhoseList,y=MeanAccuracy,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
# facet_wrap(~Word,ncol=10) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
guides(fill = "none")
agr <- d %>%
group_by(Task,Word) %>%
mutate(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanAccuracy,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word,ncol=10) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
guides(fill = "none")
# Assuming the data frame is named df
df_highest_accurate <- d %>%
group_by(Word, Task) %>%
summarize(MeanAccuracy = mean(Accuracy, na.rm = TRUE), .groups = "drop") %>%
spread(Task, MeanAccuracy) %>%
mutate(CombinedAccuracy = 0.5 * Concrete + 0.5 * Valence) %>% # Apply highest mean
arrange(desc(CombinedAccuracy))
# View(df_highest_accurate)
highest <- d %>%
group_by(Word) %>%
summarize(
highestAccuracy = 0.5 * mean(Accuracy[Task == "Concrete"], na.rm = TRUE) +
0.5 * mean(Accuracy[Task == "Valence"], na.rm = TRUE)
)
dodge = position_dodge(.9)
ggplot(data=highest, aes(x=reorder(Word,highestAccuracy),y=highestAccuracy,fill=Word)) +
geom_bar(position=dodge,stat="identity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill = "none")
highest <- highest %>%
filter(highestAccuracy > .85)
nrow(highest)
## [1] 53
dodge = position_dodge(.9)
ggplot(data=highest, aes(x=Word,y=highestAccuracy,fill=Word)) +
geom_bar(position=dodge,stat="identity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill = "none")
highest <- inner_join(highest, d, by ="Word")[,c("Word","ConcValCombo","highestAccuracy","WhoseList")] %>% distinct()
dodge = position_dodge(.9)
ggplot(data=highest, aes(x=Word,y=highestAccuracy,fill=ConcValCombo)) +
geom_bar(position=dodge,stat="identity") +
# Reorder the x-axis based on ConcValCombo levels
# scale_x_discrete(limits = unique(highest$ConcValCombo)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
table(highest$ConcValCombo)
##
## abstract-negative abstract-positive concrete-negative concrete-positive
## 20 21 19 12
# Group by ConcValCombo and get all values of Word
# highest %>%
# group_by(ConcValCombo) %>%
# summarise(Word_List = paste(Word, collapse = ", ")) %>%
# print()
highest %>%
group_by(ConcValCombo) %>%
summarise(Word_List = paste(Word, collapse = ", ")) %>%
pivot_wider(names_from = ConcValCombo, values_from = Word_List) %>%
knitr::kable() %>%
kable_styling(full_width = TRUE, position = "center", bootstrap_options = c("striped", "hover"))
| abstract-negative | abstract-positive | concrete-negative | concrete-positive |
|---|---|---|---|
| abhor, annoy, criticize, despise, despise, disappoint, dislike, dislike, distrust, distrust, doubt, dread, hate, hate, hate, loathe, loathe, scorn, smack, violate | admire, admire, admired, adore, adore, care, cherish, encourage, encourage, enjoy, enlighten, enlighten, esteem, fulfill, hope, imagine, impress, praise, respect, running, sail | beware, break, die, disappoint, discourage, distrust, exploit, kill, kill, murder, puke, slaughter, spit, spit, stab, stab, violate, vomit, vomit | behold, build, consider, cuddle, decorate, feeling, hope, hug, kiss, kiss, qualify, smile |
highest.conc <- d %>%
group_by(Word) %>%
summarize(
highestAccuracy = 0.7 * mean(Accuracy[Task == "Concrete"], na.rm = TRUE) +
0.3 * mean(Accuracy[Task == "Valence"], na.rm = TRUE)
)
dodge = position_dodge(.9)
ggplot(data=highest.conc, aes(x=reorder(Word,highestAccuracy),y=highestAccuracy,fill=Word)) +
geom_bar(position=dodge,stat="identity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill = "none")
highest.conc <- highest.conc %>%
filter(highestAccuracy > .85)
dodge = position_dodge(.9)
ggplot(data=highest.conc,aes(x=Word,y=highestAccuracy,fill=Word)) +
geom_bar(position=dodge,stat="identity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill = "none")
highest.conc <- inner_join(highest.conc, d, by ="Word")[,c("Word","ConcValCombo","highestAccuracy","WhoseList")] %>% distinct()
dodge = position_dodge(.9)
ggplot(data=highest.conc, aes(x=Word,y=highestAccuracy,fill=ConcValCombo)) +
geom_bar(position=dodge,stat="identity") +
# Reorder the x-axis based on ConcValCombo levels
# scale_x_discrete(limits = unique(highest$ConcValCombo)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
table(highest.conc$ConcValCombo)
##
## abstract-negative abstract-positive concrete-negative concrete-positive
## 13 19 16 10
# Group by ConcValCombo and get all values of Word
# highest.conc %>%
# group_by(ConcValCombo) %>%
# summarise(Word_List = paste(Word, collapse = ", ")) %>%
# print() # Print all rows in the grouped data (n = Inf prints all)
highest.conc %>%
group_by(ConcValCombo) %>%
summarise(Word_List = paste(Word, collapse = ", ")) %>%
pivot_wider(names_from = ConcValCombo, values_from = Word_List) %>%
knitr::kable() %>%
kable_styling(full_width = TRUE, position = "center", bootstrap_options = c("striped", "hover"))
| abstract-negative | abstract-positive | concrete-negative | concrete-positive |
|---|---|---|---|
| abhor, annoy, criticize, despise, despise, dislike, dislike, displease, distrust, distrust, loathe, loathe, scorn | admire, admire, admired, adore, adore, believe, cherish, enjoy, enlighten, enlighten, esteem, fulfill, hope, imagine, praise, respect, running, sail, zing | beware, bleed, break, discourage, distrust, exploit, kill, kill, murder, puke, spit, spit, stab, stab, vomit, vomit | consider, cuddle, hope, hug, justify, kiss, kiss, qualify, smile, swing |
If we neutralize the difference between the two two accuracy, we can control for confounds with RT
# Calculate the difference between Accuracy in Concrete and Valence
diff <- d %>%
filter(!is.na(Accuracy)) %>% # Remove rows with missing Accuracy
group_by(Word, Task) %>%
summarise(MeanAccuracy = mean(Accuracy)) %>% # Calculate mean Accuracy for each group
spread(Task, MeanAccuracy) %>%
# spread(key = ConcValCombo, value = MeanAccuracy) %>% # Spread the data so Concrete and Valence are separate columns
mutate(Difference = abs(Concrete - Valence)) %>% # Calculate the absolute difference
arrange(Difference) # Arrange by smallest difference
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
head(40) # Get the top 10 words with the smallest difference
## [1] 40
# add back in concvalcombo
diff <- inner_join(diff, d, by ="Word")[,c("Word","ConcValCombo","Difference","Concrete","Valence")] %>% distinct()
# Group by ConcValCombo and get all values of Word
# diff %>%
# group_by(ConcValCombo) %>%
# summarise(Word_List = paste(Word, collapse = ", ")) %>%
# print() # Print all rows in the grouped data (n = Inf prints all)
diff %>%
group_by(ConcValCombo) %>%
summarise(Word_List = paste(Word, collapse = ", ")) %>%
pivot_wider(names_from = ConcValCombo, values_from = Word_List) %>%
knitr::kable() %>%
kable_styling(full_width = TRUE, position = "center", bootstrap_options = c("striped", "hover"))
| abstract-negative | abstract-positive | concrete-negative | concrete-positive |
|---|---|---|---|
| fall, annoy, condemn, distrust, criticize, envy, scorn, obsess, loathe, abhor, manipulate, violate, resent, despise, succumb, degrade, crank, doubt, hurl, smack, disown, dislike, languish, worry, crushed, drool, snore, whip, hate, disappoint, dread, intimidate, profane, deceive, displease, look down on, meth, mourn, betray, neglect, frown, suffer, irritate, molest | esteem, running, excite, wish, inspire, hope, lick, enlighten, bathe, cherish, excell, embolden, respect, admire, flourish, fulfill, sail, sing, believe, adore, enjoy, admired, empower, embelish, praise, revitalize, laugh, encourage, innovate, imagine, breathe, caress, impress, care, zing, appreciate, relieve, check, talk, harmonize, validate, love, thrive, improve | befall, weep, distrust, break, scratch, envy, vomit, barf, discourage, impose, beware, spit, violate, carjack, slap, exploit, smash, murder, bleed, kill, rot, puke, stab, disappoint, punch, piss, die, slaughter, strangle, suffocate, choke, convict, demolish, pollute, steal, destroy, irritate, perish, infect, presume, fail, bore | consider, wish, kiss, cook, eat, hope, build, qualify, bathe, hug, swing, cuddle, repair, sing, dance, smile, behold, feeling, sleep, smooch, laugh, construct, decorate, forgive, justify, specialize, caress, lift, soothe, reach, embellish, play, deliver, clean, shine, heal, support, complete, relax, rejuvenate, improve, explore, help, adorn, prosper |
dodge = position_dodge(.9)
ggplot(data=diff, aes(x=reorder(Word,Difference),y=Difference,fill=ConcValCombo)) +
geom_bar(position=dodge,stat="identity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
nrow(diff)
## [1] 175
# Common words
common_words <- Reduce(intersect, list(diff$Word, highest$Word, highest.conc$Word))
print("Common Words:")
## [1] "Common Words:"
print(common_words)
## [1] "consider" "esteem" "running" "annoy" "kiss"
## [6] "distrust" "break" "criticize" "vomit" "hope"
## [11] "discourage" "qualify" "scorn" "beware" "enlighten"
## [16] "spit" "loathe" "abhor" "cherish" "hug"
## [21] "respect" "admire" "despise" "fulfill" "cuddle"
## [26] "exploit" "sail" "murder" "dislike" "adore"
## [31] "enjoy" "kill" "smile" "admired" "puke"
## [36] "stab" "praise" "imagine"
# Unique words in each data frame
unique_to_diff <- setdiff(diff$Word, union(highest$Word, highest.conc$Word))
unique_to_highest <- setdiff(highest$Word, union(diff$Word, highest.conc$Word))
unique_to_highest.conc <- setdiff(highest.conc$Word, union(diff$Word, highest$Word))
print("Unique to diff:")
## [1] "Unique to diff:"
print(unique_to_diff)
## [1] "befall" "fall" "excite" "wish" "condemn"
## [6] "weep" "scratch" "envy" "inspire" "cook"
## [11] "eat" "barf" "impose" "lick" "obsess"
## [16] "bathe" "manipulate" "carjack" "excell" "slap"
## [21] "resent" "embolden" "flourish" "succumb" "degrade"
## [26] "crank" "hurl" "repair" "sing" "smash"
## [31] "disown" "languish" "dance" "worry" "rot"
## [36] "empower" "crushed" "drool" "snore" "whip"
## [41] "sleep" "embelish" "punch" "revitalize" "smooch"
## [46] "intimidate" "piss" "laugh" "construct" "innovate"
## [51] "profane" "breathe" "deceive" "forgive" "specialize"
## [56] "caress" "strangle" "suffocate" "look down on" "appreciate"
## [61] "choke" "relieve" "check" "lift" "meth"
## [66] "talk" "convict" "harmonize" "mourn" "validate"
## [71] "love" "demolish" "soothe" "betray" "neglect"
## [76] "thrive" "frown" "pollute" "reach" "steal"
## [81] "embellish" "suffer" "destroy" "play" "deliver"
## [86] "irritate" "perish" "clean" "shine" "infect"
## [91] "presume" "heal" "support" "complete" "relax"
## [96] "rejuvenate" "improve" "explore" "fail" "bore"
## [101] "help" "molest" "adorn" "prosper"
print("Unique to highest:")
## [1] "Unique to highest:"
print(unique_to_highest)
## character(0)
print("Unique to highest.conc:")
## [1] "Unique to highest.conc:"
print(unique_to_highest.conc)
## character(0)
# Words in any data frame but not necessarily all
all_words <- union(union(diff$Word, highest$Word), highest.conc$Word)
any_but_not_all <- setdiff(all_words, common_words)
print("Words in any but not all data frames:")
## [1] "Words in any but not all data frames:"
print(any_but_not_all)
## [1] "befall" "fall" "excite" "wish" "condemn"
## [6] "weep" "scratch" "envy" "inspire" "cook"
## [11] "eat" "barf" "build" "impose" "lick"
## [16] "obsess" "bathe" "manipulate" "violate" "carjack"
## [21] "swing" "excell" "slap" "resent" "embolden"
## [26] "flourish" "succumb" "degrade" "crank" "doubt"
## [31] "hurl" "repair" "smack" "sing" "smash"
## [36] "disown" "believe" "bleed" "languish" "dance"
## [41] "worry" "rot" "behold" "empower" "feeling"
## [46] "crushed" "drool" "snore" "whip" "hate"
## [51] "sleep" "disappoint" "embelish" "dread" "punch"
## [56] "revitalize" "smooch" "intimidate" "piss" "laugh"
## [61] "construct" "encourage" "innovate" "profane" "die"
## [66] "breathe" "deceive" "decorate" "forgive" "justify"
## [71] "slaughter" "specialize" "caress" "impress" "care"
## [76] "strangle" "suffocate" "displease" "zing" "look down on"
## [81] "appreciate" "choke" "relieve" "check" "lift"
## [86] "meth" "talk" "convict" "harmonize" "mourn"
## [91] "validate" "love" "demolish" "soothe" "betray"
## [96] "neglect" "thrive" "frown" "pollute" "reach"
## [101] "steal" "embellish" "suffer" "destroy" "play"
## [106] "deliver" "irritate" "perish" "clean" "shine"
## [111] "infect" "presume" "heal" "support" "complete"
## [116] "relax" "rejuvenate" "improve" "explore" "fail"
## [121] "bore" "help" "molest" "adorn" "prosper"
agr <- d %>%
group_by(Task,ConcValCombo,Word) %>%
summarise(MeanAccuracy = mean(Accuracy))
## `summarise()` has grouped output by 'Task', 'ConcValCombo'. You can override
## using the `.groups` argument.
common_words_df <- data.frame(Word = common_words)
common.words <- inner_join(common_words_df,agr, by="Word")
dodge = position_dodge(.9)
ggplot(data=common.words, aes(x=Task,y=MeanAccuracy,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word,ncol=5)
diff <- diff %>%
mutate(MeanAccuracy = .5 * Concrete + .5 * Valence)
ggplot(diff, aes(x = Difference, y = MeanAccuracy)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
# geom_text(aes(label = Word), vjust = -0.5, hjust = 1.5) # Add labels
geom_text(aes(label = Word, color = ConcValCombo), vjust = -0.5, hjust = 1.5) # Labels colored by ConcValCombo
## `geom_smooth()` using formula = 'y ~ x'
head(diff)
## # A tibble: 6 × 6
## # Groups: Word [6]
## Word ConcValCombo Difference Concrete Valence MeanAccuracy
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 befall concrete-negative 0 0.85 0.85 0.85
## 2 consider concrete-positive 0 0.9 0.9 0.9
## 3 esteem abstract-positive 0 0.967 0.967 0.967
## 4 fall abstract-negative 0 0.8 0.8 0.8
## 5 running abstract-positive 0 0.9 0.9 0.9
## 6 annoy abstract-negative 0.00366 0.85 0.854 0.852
# Step 1: Find the maximum MeanAccuracy value
max_accuracy <- max(diff$MeanAccuracy, na.rm = TRUE)
# Step 2: Filter the words with the maximum MeanAccuracy
max_accuracy_words <- diff %>%
filter(MeanAccuracy == max_accuracy)
# Step 3: Find the minimum Difference value among the filtered words
min_difference <- min(max_accuracy_words$Difference, na.rm = TRUE)
# Step 4: Filter the words that have both maximum MeanAccuracy and minimum Difference
result_words <- max_accuracy_words %>%
filter(Difference == min_difference)
# Print the result
print(result_words$Word)
## [1] "esteem"
# Select top 5 words with the highest MeanAccuracy
top_accuracy_words <- diff %>%
arrange(desc(MeanAccuracy)) %>%
head(40)
# Select top 5 words with the lowest Difference
bottom_difference_words <- top_accuracy_words %>%
arrange(Difference) %>%
head(40)
# Print the top and bottom words
# print(top_accuracy_words)
print(bottom_difference_words,n=40)
## # A tibble: 40 × 6
## # Groups: Word [38]
## Word ConcValCombo Difference Concrete Valence MeanAccuracy
## <chr> <chr> <dbl> <dbl> <dbl> <dbl>
## 1 esteem abstract-positive 0 0.967 0.967 0.967
## 2 consider concrete-positive 0 0.9 0.9 0.9
## 3 running abstract-positive 0 0.9 0.9 0.9
## 4 kiss concrete-positive 0.0291 0.94 0.911 0.925
## 5 distrust concrete-negative 0.0326 0.95 0.917 0.934
## 6 distrust abstract-negative 0.0326 0.95 0.917 0.934
## 7 criticize abstract-negative 0.0333 0.917 0.95 0.933
## 8 break concrete-negative 0.0333 0.85 0.883 0.867
## 9 vomit concrete-negative 0.0390 0.94 0.901 0.920
## 10 hope concrete-positive 0.0475 0.9 0.852 0.876
## 11 hope abstract-positive 0.0475 0.9 0.852 0.876
## 12 scorn abstract-negative 0.0500 0.933 0.983 0.958
## 13 discourage concrete-negative 0.0500 0.95 0.9 0.925
## 14 qualify concrete-positive 0.0500 0.9 0.95 0.925
## 15 beware concrete-negative 0.0500 0.9 0.85 0.875
## 16 enlighten abstract-positive 0.0505 0.948 0.898 0.923
## 17 spit concrete-negative 0.0590 0.96 0.901 0.930
## 18 loathe abstract-negative 0.0598 0.863 0.923 0.893
## 19 cherish abstract-positive 0.0667 0.933 1 0.967
## 20 hug concrete-positive 0.0667 0.933 1 0.967
## 21 abhor abstract-negative 0.0667 0.95 0.883 0.917
## 22 respect abstract-positive 0.0833 0.917 1 0.958
## 23 despise abstract-negative 0.0855 0.889 0.974 0.932
## 24 admire abstract-positive 0.0855 0.872 0.957 0.915
## 25 cuddle concrete-positive 0.1 0.867 0.967 0.917
## 26 exploit concrete-negative 0.1 0.85 0.95 0.9
## 27 sail abstract-positive 0.1 0.85 0.95 0.9
## 28 murder concrete-negative 0.117 0.883 1 0.942
## 29 dislike abstract-negative 0.120 0.863 0.983 0.923
## 30 adore abstract-positive 0.128 0.846 0.974 0.910
## 31 enjoy abstract-positive 0.133 0.867 1 0.933
## 32 kill concrete-negative 0.137 0.829 0.966 0.897
## 33 smile concrete-positive 0.140 0.825 0.965 0.895
## 34 behold concrete-positive 0.150 0.8 0.95 0.875
## 35 feeling concrete-positive 0.150 0.8 0.95 0.875
## 36 stab concrete-negative 0.15 0.817 0.967 0.892
## 37 praise abstract-positive 0.167 0.833 1 0.917
## 38 imagine abstract-positive 0.195 0.975 0.780 0.878
## 39 slaughter concrete-negative 0.2 0.783 0.983 0.883
## 40 care abstract-positive 0.217 0.783 1 0.892
table(bottom_difference_words$ConcValCombo)
##
## abstract-negative abstract-positive concrete-negative concrete-positive
## 7 13 11 9
length(unique(bottom_difference_words$Word))
## [1] 38
sub.data <- inner_join(d,bottom_difference_words)
## Joining with `by = join_by(Word, ConcValCombo)`
ggplot(sub.data, aes(LogReactionTime, fill=Task)) +
geom_density(alpha = .5)
summary(d$LogReactionTime)
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## 0.6931 6.4785 6.7044 6.7677 7.0166 10.8780 1
agr <- sub.data %>%
group_by(Task) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanAccuracy, fill=Task)) +
geom_bar(position=dodge,stat="identity") +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
guides(fill = "none")
agr <- sub.data %>%
group_by(Task,Word) %>%
mutate(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanAccuracy,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
guides(fill = "none")
agr <- sub.data %>%
group_by(Task,Word,ConcValCombo) %>%
mutate(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanAccuracy,fill=ConcValCombo)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9))
# guides(fill = "none")
agr = sub.data %>%
group_by(Task,Word) %>%
summarize(MeanLogReactionTime = mean(LogReactionTime),
CILow = ci.low(LogReactionTime),
CIHigh = ci.high(LogReactionTime)) %>%
mutate(YMin = MeanLogReactionTime - CILow,
YMax = MeanLogReactionTime + CIHigh)
## `summarise()` has grouped output by 'Task'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x=MeanLogReactionTime, fill=Task)) +
geom_density(alpha = .4)
ggplot(agr, aes(x=Task, y=MeanLogReactionTime,fill=Task)) +
geom_violin(trim=FALSE,alpha=.4) +
geom_jitter(shape=16, position=position_jitter(0.2)) +
guides(fill = "none")
agr = sub.data %>%
group_by(Task,Word) %>%
summarize(MeanReactionTime = mean(ReactionTime), CILow = ci.low(ReactionTime), CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow, YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'Task'. You can override using the
## `.groups` argument.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanReactionTime,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
guides(fill = "none")
summary(sub.data$ReactionTime)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 3.0 634.0 787.0 963.2 1049.0 15835.0
agr <- sub.data %>%
group_by(Task) %>%
summarise(MeanRT = mean(ReactionTime)) %>%
print()
## # A tibble: 2 × 2
## Task MeanRT
## <chr> <dbl>
## 1 Concrete 1064.
## 2 Valence 863.
agr = sub.data %>%
group_by(Task,Word,ConcValCombo) %>%
summarize(MeanReactionTime = mean(ReactionTime), CILow = ci.low(ReactionTime), CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow, YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'Task', 'Word'. You can override using the
## `.groups` argument.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanReactionTime,fill=ConcValCombo)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9))
# guides(fill = "none")
Yes.
m = lmer(LogReactionTime ~ cTask + (1+cTask|ID.true) + (1+cTask|Word), data=center)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cTask + (1 + cTask | ID.true) + (1 + cTask |
## Word)
## Data: center
##
## REML criterion at convergence: 4143.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -8.7343 -0.5589 -0.1408 0.4099 8.1678
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID.true (Intercept) 0.101231 0.31817
## cTask 0.149402 0.38653 -0.37
## Word (Intercept) 0.002414 0.04914
## cTask 0.005052 0.07108 0.04
## Residual 0.115898 0.34044
## Number of obs: 5039, groups: ID.true, 138; Word, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.76463 0.03055 163.34426 221.429 < 2e-16 ***
## cTask -0.15851 0.04244 123.66754 -3.735 0.000285 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cTask -0.294
No, but there is a main effect of accuracy on RT, but no interaction
m = lmer(LogReactionTime ~ cAccuracy*cTask + (1+cTask|ID.true) + (1+cTask|Word), data=center)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cAccuracy * cTask + (1 + cTask | ID.true) +
## (1 + cTask | Word)
## Data: center
##
## REML criterion at convergence: 4146.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -8.7047 -0.5597 -0.1377 0.4120 8.2087
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID.true (Intercept) 0.103017 0.32096
## cTask 0.148239 0.38502 -0.37
## Word (Intercept) 0.002315 0.04812
## cTask 0.004722 0.06872 0.02
## Residual 0.115772 0.34025
## Number of obs: 5039, groups: ID.true, 138; Word, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.76237 0.03072 162.64180 220.094 < 2e-16 ***
## cAccuracy -0.06500 0.02306 4271.86947 -2.818 0.004853 **
## cTask -0.15912 0.04225 122.19095 -3.766 0.000256 ***
## cAccuracy:cTask -0.05656 0.04608 4240.63111 -1.228 0.219702
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cAccrc cTask
## cAccuracy 0.026
## cTask -0.293 0.004
## cAccrcy:cTs -0.001 0.442 0.039
sub.data.acc.trials.only <- sub.data %>%
group_by(ID.true) %>%
filter(Accuracy == 1)
nrow(sub.data.acc.trials.only)/nrow(sub.data)*100
## [1] 91.7047
agr = sub.data.acc.trials.only %>%
group_by(Task,Word) %>%
summarize(MeanLogReactionTime = mean(LogReactionTime),
CILow = ci.low(LogReactionTime),
CIHigh = ci.high(LogReactionTime)) %>%
mutate(YMin = MeanLogReactionTime - CILow,
YMax = MeanLogReactionTime + CIHigh)
## `summarise()` has grouped output by 'Task'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x=MeanLogReactionTime, fill=Task)) +
geom_density(alpha = .4)
ggplot(agr, aes(x=Task, y=MeanLogReactionTime,fill=Task)) +
geom_violin(trim=FALSE,alpha=.4) +
geom_jitter(shape=16, position=position_jitter(0.2)) +
guides(fill = "none")
agr = sub.data.acc.trials.only %>%
group_by(Task,Word) %>%
summarize(MeanReactionTime = mean(ReactionTime), CILow = ci.low(ReactionTime), CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow, YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'Task'. You can override using the
## `.groups` argument.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanReactionTime,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
guides(fill = "none")
Yes.
m = lmer(LogReactionTime ~ cTask + (1+cTask|ID.true) + (1+cTask|Word), data=center)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cTask + (1 + cTask | ID.true) + (1 + cTask |
## Word)
## Data: center
##
## REML criterion at convergence: 3421.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -10.4176 -0.5635 -0.1419 0.4113 7.2349
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID.true (Intercept) 0.103979 0.32246
## cTask 0.142821 0.37792 -0.12
## Word (Intercept) 0.002379 0.04877
## cTask 0.004695 0.06852 -0.13
## Residual 0.106085 0.32571
## Number of obs: 4621, groups: ID.true, 138; Word, 38
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.77306 0.03002 157.98437 225.590 < 2e-16 ***
## cTask -0.16344 0.04596 79.15910 -3.556 0.000639 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cTask -0.090
No.
m = lmer(LogReactionTime ~ ConcValCombo*cTask + (1+cTask+ConcValCombo|ID.true) + (1+cTask|Word), data=center)
## boundary (singular) fit: see help('isSingular')
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ ConcValCombo * cTask + (1 + cTask + ConcValCombo |
## ID.true) + (1 + cTask | Word)
## Data: center
##
## REML criterion at convergence: 3318.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -7.2670 -0.5773 -0.1458 0.4227 6.5399
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID.true (Intercept) 0.163937 0.40489
## cTask 0.154303 0.39281 -0.37
## ConcValComboabstract-positive 0.005709 0.07556 -0.42 0.94
## ConcValComboconcrete-negative 0.069755 0.26411 -0.74 0.82 0.78
## ConcValComboconcrete-positive 0.036783 0.19179 -0.75 0.75 0.75
## Word (Intercept) 0.001369 0.03700
## cTask 0.003717 0.06097 0.45
## Residual 0.101721 0.31894
##
##
##
##
##
## 0.99
##
##
##
## Number of obs: 4621, groups: ID.true, 138; Word, 38
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 6.775139 0.042204 96.594165 160.533
## ConcValComboabstract-positive 0.019415 0.023638 25.984674 0.821
## ConcValComboconcrete-negative 0.002842 0.035912 50.250300 0.079
## ConcValComboconcrete-positive -0.059998 0.034050 39.513805 -1.762
## cTask -0.143135 0.052998 85.168262 -2.701
## ConcValComboabstract-positive:cTask -0.049168 0.039522 23.156109 -1.244
## ConcValComboconcrete-negative:cTask 0.031433 0.042989 22.753935 0.731
## ConcValComboconcrete-positive:cTask 0.064823 0.051331 24.703763 1.263
## Pr(>|t|)
## (Intercept) < 2e-16 ***
## ConcValComboabstract-positive 0.41892
## ConcValComboconcrete-negative 0.93724
## ConcValComboconcrete-positive 0.08579 .
## cTask 0.00835 **
## ConcValComboabstract-positive:cTask 0.22592
## ConcValComboconcrete-negative:cTask 0.47213
## ConcValComboconcrete-positive:cTask 0.21844
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) CncVlCmbb- CncVlCmbcncrt-n CncVlCmbcncrt-p cTask
## CncVlCmbbs- -0.475
## CncVlCmbcncrt-n -0.743 0.559
## CncVlCmbcncrt-p -0.677 0.580 0.741
## cTask -0.231 0.168 0.400 0.275
## CncVlCmbb-:T -0.090 0.237 0.094 0.117 -0.451
## CncVlCmbcncrt-n:T -0.061 0.120 0.133 0.084 -0.438
## CncVlCmbcncrt-p:T -0.077 0.125 0.080 0.187 -0.340
## CncVlCmbb-:T CncVlCmbcncrt-n:T
## CncVlCmbbs-
## CncVlCmbcncrt-n
## CncVlCmbcncrt-p
## cTask
## CncVlCmbb-:T
## CncVlCmbcncrt-n:T 0.534
## CncVlCmbcncrt-p:T 0.450 0.413
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')